const runtime.pageSize

85 uses

	runtime (current package)
		arena.go#L196: 	userArenaChunkPages = userArenaChunkBytes / pageSize
		arena.go#L206: 	if userArenaChunkPages*pageSize != userArenaChunkBytes {
		arena.go#L857: 	if s.npages*pageSize != userArenaChunkBytes {
		arena.go#L878: 	sysFault(unsafe.Pointer(s.base()), s.npages*pageSize)
		arena.go#L883: 	gcController.heapInUse.add(-int64(s.npages * pageSize))
		arena.go#L896: 	atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize))
		arena.go#L897: 	atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize))
		arena.go#L938: 	if s.npages*pageSize != userArenaChunkBytes {
		heapdump.go#L475: var freemark [pageSize / 8]bool
		malloc.go#L117: 	pageSize      = 1 << gc.PageShift
		malloc.go#L118: 	pageMask      = pageSize - 1
		malloc.go#L121: 	_PageSize              = pageSize
		malloc.go#L260: 	pagesPerArena = heapArenaBytes / pageSize
		malloc.go#L1943: 		if align > pageSize {
		mbitmap.go#L553: 		return heapBitsSlice(span.base(), pageSize, span.elemsize)
		mbitmap.go#L555: 	return heapBitsSlice(span.base(), span.npages*pageSize, span.elemsize)
		mbitmap.go#L586: 	hbitsBase, _ := spanHeapBitsRange(span.base(), span.npages*pageSize, span.elemsize)
		mbitmap.go#L653: 	dstBase, _ := spanHeapBitsRange(span.base(), pageSize, span.elemsize)
		mcache.go#L214: 	gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
		mcache.go#L222: 	if size+pageSize < size {
		mcache.go#L233: 	deductSweepCredit(npages*pageSize, npages)
		mcache.go#L243: 	atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
		mcache.go#L248: 	gcController.totalAlloc.Add(int64(npages * pageSize))
		mcache.go#L251: 	gcController.update(int64(s.npages*pageSize), 0)
		mcentral.go#L84: 	spanBytes := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()]) * pageSize
		mgcscavenge.go#L130: 	maxPagesPerPhysPage = maxPhysPageSize / pageSize
		mgcscavenge.go#L739: 	maxPages := max / pageSize
		mgcscavenge.go#L740: 	if max%pageSize != 0 {
		mgcscavenge.go#L749: 	minPages := physPageSize / pageSize
		mgcscavenge.go#L763: 			addr := chunkBase(ci) + uintptr(base)*pageSize
		mgcscavenge.go#L778: 				sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
		mgcscavenge.go#L782: 				nbytes := int64(npages * pageSize)
		mgcscavenge.go#L805: 			return uintptr(npages) * pageSize
		mgcscavenge.go#L962: 	if physHugePageSize > pageSize && physHugePageSize > physPageSize {
		mgcscavenge.go#L969: 		pagesPerHugePage := physHugePageSize / pageSize
		mgcscavenge.go#L1104: 		newSearchAddr := chunkBase(i) + pallocChunkBytes - pageSize
		mgcscavenge.go#L1153: 	addr := chunkBase(ci) + uintptr(page+npages-1)*pageSize
		mgcsweep.go#L527: 		trace.GCSweepSpan(s.npages * pageSize)
		mgcsweep.go#L999: 		if heapDistance < pageSize {
		mgcsweep.go#L1001: 			heapDistance = pageSize
		mgcwork.go#L29: 	if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
		mgcwork.go#L109: 	ptrBuf *[pageSize / goarch.PtrSize]uintptr
		mgcwork.go#L436: 				s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
		mheap.go#L726: 	return ha.spans[(p/pageSize)%pagesPerArena]
		mheap.go#L737: 	return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
		mheap.go#L764: 	pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
		mheap.go#L765: 	pageMask = byte(1 << ((p / pageSize) % 8))
		mheap.go#L976: 		trace.GCSweepSpan((n0 - nFreed) * pageSize)
		mheap.go#L1049: 	p := base / pageSize
		mheap.go#L1055: 			ai = arenaIndex(base + n*pageSize)
		mheap.go#L1097: 		arenaLimit := arenaBase + npage*pageSize
		mheap.go#L1120: 		npage -= (arenaLimit - arenaBase) / pageSize
		mheap.go#L1233: 	needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
		mheap.go#L1267: 		extraPages := physPageSize / pageSize
		mheap.go#L1396: 		valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize)
		mheap.go#L1400: 	nbytes := npages * pageSize
		mheap.go#L1446: 	nbytes := npages * pageSize
		mheap.go#L1555: 	ask := alignUp(npage, pallocChunkPages) * pageSize
		mheap.go#L1729: 	nbytes := s.npages * pageSize
		mheap.go#L2026: 	arenaPage := (s.base() / pageSize) % pagesPerArena
		mheap.go#L2034: 	arenaPage := (s.base() / pageSize) % pagesPerArena
		mpagealloc.go#L60: 	pallocChunkBytes    = pallocChunkPages * pageSize
		mpagealloc.go#L121: 	return uint(p % pallocChunkBytes / pageSize)
		mpagealloc.go#L431: 	p.update(base, size/pageSize, true, false)
		mpagealloc.go#L492: 	limit := base + npages*pageSize - 1
		mpagealloc.go#L576: 	limit := base + npages*pageSize - 1
		mpagealloc.go#L605: 	return uintptr(scav) * pageSize
		mpagealloc.go#L775: 			foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
		mpagealloc.go#L813: 			addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
		mpagealloc.go#L854: 	addr := chunkBase(ci) + uintptr(j)*pageSize
		mpagealloc.go#L858: 	searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
		mpagealloc.go#L897: 			addr = chunkBase(i) + uintptr(j)*pageSize
		mpagealloc.go#L898: 			searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
		mpagealloc.go#L943: 	limit := base + npages*pageSize - 1
		mpagecache.go#L46: 		return c.base + i*pageSize, uintptr(scav) * pageSize
		mpagecache.go#L66: 	return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
		mpagecache.go#L138: 			base:  chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
		mpagecache.go#L155: 			base:  alignDown(addr, 64*pageSize),
		mpagecache.go#L181: 	p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
		msize.go#L31: 	reqSize += pageSize - 1
		msize.go#L35: 	return reqSize &^ (pageSize - 1)
		traceallocfree.go#L41: 	w.varint(uint64(pageSize))
		traceallocfree.go#L109: 	return traceArg(uint64(s.base())-trace.minPageHeapAddr) / pageSize